Filter criteria:

glue('Number of genes: ', nrow(datExpr), '\n',
     'Number of samples: ', ncol(datExpr), ' (', sum(datMeta$Diagnosis_=='ASD'), ' ASD, ',
     sum(datMeta$Diagnosis_!='ASD'), ' controls)')
## Number of genes: 6417
## Number of samples: 86 (51 ASD, 35 controls)

Dimensionality reduction using PCA

reduce_dim_datExpr = function(datExpr, datMeta, var_explained=0.8, filter_controls=FALSE){

  datExpr = data.frame(datExpr)
  
  if(filter_controls){
    datMeta = datMeta %>% filter(Diagnosis_=='ASD')
    datExpr = datExpr %>% select(paste0('X', datMeta_ASD$Dissected_Sample_ID))
  }
  
  datExpr_pca = prcomp(t(datExpr), scale=TRUE)
  par(mfrow=c(1,2))
  plot(summary(datExpr_pca)$importance[2,], type='b')
  plot(summary(datExpr_pca)$importance[3,], type='b')
  abline(h=var_explained, col='blue')
  
  last_pc = data.frame(summary(datExpr_pca)$importance[3,]) %>% rownames_to_column(var='ID') %>% 
    filter(.[[2]] >= var_explained) %>% top_n(-1, ID)
  
  print(glue('Keeping top ', substr(last_pc$ID, 3, nchar(last_pc$ID)), ' components that explain ',
             var_explained*100, '% of the variance'))
  
  datExpr_top_pc = datExpr_pca$x %>% data.frame %>% dplyr::select(PC1:last_pc$ID)
  
  return(list('datExpr'=datExpr_top_pc, 'datMeta'=datMeta, 'pca_output'=datExpr_pca))
}

reduce_dim_output = reduce_dim_datExpr(datExpr, datMeta)

## Keeping top 11 components that explain 80% of the variance
datExpr_redDim = reduce_dim_output$datExpr
datMeta_redDim = reduce_dim_output$datMeta
pca_output = reduce_dim_output$pca_output

rm(datSeq, datProbes, reduce_dim_datExpr, reduce_dim_output, datExpr, datMeta)

Clustering

clusterings = list()

K-means Clustering

No recognisable best k, so chose k=5

set.seed(123)
wss = sapply(1:15, function(k) kmeans(datExpr_redDim, k, nstart=25)$tot.withinss)
plot(wss, type='b', main='K-Means Clustering')
best_k = 5
abline(v = best_k, col='blue')

datExpr_k_means = kmeans(datExpr_redDim, best_k, nstart=25)
clusterings[['km']] = datExpr_k_means$cluster

Hierarchical Clustering

Chose k=6 as best number of clusters.

Clusters seem to be able to separate ASD and control samples pretty well and there are no noticeable patterns regarding sex, age or brain region in any cluster.

Younger ASD samples seem to be more similar to control samples than older ASD samples (pink cluster has most of the youngest samples). The yellow cluster is made of young ASD samples.

Colors:

  • Diagnosis: Blue=control, Green=ASD

  • Sex: Pink=Female, Blue=Male

  • Brain region: Pink=Frontal, Green=Temporal, Blue=Parietal, Purple=Occipital

  • Age: Purple=youngest, Yellow=oldest

h_clusts = datExpr_redDim %>% dist %>% hclust %>% as.dendrogram
# h_clusts %>% plot
best_k = 6
clusterings[['hc']] = cutree(h_clusts, best_k)

create_viridis_dict = function(age){
  min_age = datMeta_redDim$Age %>% min
  max_age = datMeta_redDim$Age %>% max
  viridis_age_cols = viridis(max_age - min_age + 1)
  names(viridis_age_cols) = seq(min_age, max_age)
  
  return(viridis_age_cols)
}
viridis_age_cols = create_viridis_dict()

dend_meta = datMeta_redDim[match(gsub('X','',labels(h_clusts)), rownames(datMeta_redDim)),] %>% 
            mutate('Diagnosis' = ifelse(Diagnosis_=='CTL','#008080','#86b300'), # Blue control, Green ASD
                   'Sex' = ifelse(Sex=='F','#ff6666','#008ae6'),                # Pink Female, Blue Male
                   'Region' = case_when(Brain_lobe=='Frontal'~'#F8766D',        # ggplot defaults for 4 colours
                                      Brain_lobe=='Temporal'~'#7CAE00',
                                      Brain_lobe=='Parietal'~'#00BFC4',
                                      Brain_lobe=='Occipital'~'#C77CFF'),
                   'Age' = viridis_age_cols[as.character(Age)]) %>%            # Purple: young, Yellow: old
            dplyr::select(Age, Region, Sex, Diagnosis)
h_clusts %>% set('labels', rep('', nrow(datMeta_redDim))) %>% set('branches_k_color', k=best_k) %>% plot
colored_bars(colors=dend_meta)

Consensus Clustering

Samples are grouped into two big clusters and then each cluster in 4 and 6 subclusters, respectively. The first big separation into two clusters is very clear, the subclusters not so much.

*Output plots in clustering_samples_03_15 folder

Independent Component Analysis

Following this paper’s guidelines:

  1. Run PCA and keep enough components to explain 60% of the variance

  2. Run ICA with that same number of nbComp as principal components kept to then filter them

  3. Select components with kurtosis > 3

  4. Assign obs to clusters with FDR<0.01 using the fdrtool package

## Step 1... determine cutoff point
## Step 2... estimate parameters of null distribution and eta0
## Step 3... compute p-values and estimate empirical PDF/CDF
## Step 4... compute q-values and local fdr
## 
## Step 1... determine cutoff point
## Step 2... estimate parameters of null distribution and eta0
## Step 3... compute p-values and estimate empirical PDF/CDF
## Step 4... compute q-values and local fdr
## 
## Step 1... determine cutoff point
## Step 2... estimate parameters of null distribution and eta0
## Step 3... compute p-values and estimate empirical PDF/CDF
## Step 4... compute q-values and local fdr
## 
## Step 1... determine cutoff point
## Step 2... estimate parameters of null distribution and eta0
## Step 3... compute p-values and estimate empirical PDF/CDF
## Step 4... compute q-values and local fdr
## 
## Step 1... determine cutoff point
## Step 2... estimate parameters of null distribution and eta0
## Step 3... compute p-values and estimate empirical PDF/CDF
## Step 4... compute q-values and local fdr
## 
## Step 1... determine cutoff point
## Step 2... estimate parameters of null distribution and eta0
## Step 3... compute p-values and estimate empirical PDF/CDF
## Step 4... compute q-values and local fdr
## 
## Step 1... determine cutoff point
## Step 2... estimate parameters of null distribution and eta0
## Step 3... compute p-values and estimate empirical PDF/CDF
## Step 4... compute q-values and local fdr
## 
## Step 1... determine cutoff point
## Step 2... estimate parameters of null distribution and eta0
## Step 3... compute p-values and estimate empirical PDF/CDF
## Step 4... compute q-values and local fdr
## 
## Step 1... determine cutoff point
## Step 2... estimate parameters of null distribution and eta0
## Step 3... compute p-values and estimate empirical PDF/CDF
## Step 4... compute q-values and local fdr
## 
## Step 1... determine cutoff point
## Step 2... estimate parameters of null distribution and eta0
## Step 3... compute p-values and estimate empirical PDF/CDF
## Step 4... compute q-values and local fdr
## 
## Step 1... determine cutoff point
## Step 2... estimate parameters of null distribution and eta0
## Step 3... compute p-values and estimate empirical PDF/CDF
## Step 4... compute q-values and local fdr

Not a good method for clustering samples because:

  1. ICA does not perform well with small samples (see Figure 4 of this paper)

  2. Warnings: (Warning in fdrtool(x, plot = F): There may be too few input test statistics for reliable FDR calculations!)

  3. Leaves most of the observations without a cluster:

ICA_clusters %>% rowSums %>% table
## .
##  0  1  2 
## 60 23  3

WGCNA

The values in SFT.R.sq give weird jumps, starting high, then very low and then high again. best_power = 29.

best_power = datExpr_redDim %>% t %>% pickSoftThreshold(powerVector = seq(10, 30, by=1))
##    Power SFT.R.sq  slope truncated.R.sq mean.k. median.k. max.k.
## 1     10    0.672 -0.862         0.6930   2.620     1.870   8.69
## 2     11    0.749 -0.913         0.7880   2.200     1.520   7.64
## 3     12    0.756 -0.953         0.7800   1.880     1.250   6.76
## 4     13    0.790 -0.981         0.8080   1.620     1.040   6.02
## 5     14    0.738 -1.020         0.7310   1.400     0.828   5.39
## 6     15    0.153 -2.320        -0.0612   1.230     0.728   4.90
## 7     16    0.157 -2.340        -0.0547   1.090     0.662   4.48
## 8     17    0.157 -2.320        -0.0514   0.965     0.594   4.12
## 9     18    0.159 -2.370        -0.0482   0.863     0.543   3.80
## 10    19    0.160 -2.330        -0.0448   0.775     0.491   3.51
## 11    20    0.161 -2.300        -0.0384   0.700     0.447   3.26
## 12    21    0.163 -2.290        -0.0336   0.635     0.408   3.04
## 13    22    0.161 -2.210        -0.0428   0.579     0.371   2.84
## 14    23    0.162 -2.160        -0.0387   0.529     0.335   2.65
## 15    24    0.161 -2.120        -0.0358   0.486     0.303   2.49
## 16    25    0.160 -2.070        -0.0332   0.447     0.276   2.34
## 17    26    0.163 -2.080        -0.0298   0.412     0.250   2.20
## 18    27    0.165 -2.100        -0.0259   0.382     0.227   2.08
## 19    28    0.165 -2.060        -0.0210   0.354     0.206   1.96
## 20    29    0.883 -1.060         0.9670   0.329     0.183   1.86
## 21    30    0.896 -1.090         0.9610   0.306     0.160   1.76
network = datExpr_redDim %>% t %>% blockwiseModules(power=best_power$powerEstimate, numericLabels=TRUE)
##      mergeCloseModules: less than two proper modules.
##       ..color levels are 0, 1
##       ..there is nothing to merge.
clusterings[['WGCNA']] = network$colors
names(clusterings[['WGCNA']]) = rownames(datExpr_redDim)

It finds a single cluster grouping 24 observations and leaves the rest without cluster:

table(clusterings[['WGCNA']])
## 
##  0  1 
## 62 24

Gaussian Mixture Models with hard thresholding

Points don’t seem to follow a Gaussian distribution no matter the number of clusters, chose 5 points following the best k from K-means because the methods are similar

n_clust = datExpr_redDim %>% Optimal_Clusters_GMM(max_clusters=80, criterion='BIC', plot_data=FALSE)
plot(n_clust, type='l', main='Bayesian Information Criterion to choose number of clusters')

best_k = 5 # copying k-means best_k
gmm = datExpr_redDim %>% GMM(best_k)
clusterings[['GMM']] = gmm$Log_likelihood %>% apply(1, which.max)

Plot of clusters with their centroids in gray

gmm_points = rbind(datExpr_redDim, setNames(data.frame(gmm$centroids), names(datExpr_redDim)))
gmm_labels = c(clusterings[['GMM']], rep(NA, best_k)) %>% as.factor
ggplotly(gmm_points %>% ggplot(aes(x=PC1, y=PC2, color=gmm_labels)) + geom_point() + theme_minimal())
rm(wss, datExpr_k_means, h_clusts, cc_output, cc_output_c1, cc_output_c2, best_k, ICA_output, 
   ICA_clusters_names, signals_w_kurtosis, n_clust, gmm, gmm_points, gmm_labels, network, dend_meta, 
   best_power, c, viridis_age_cols, create_viridis_dict)

Compare clusterings

Using Adjusted Rand Index:

clusters_plus_phenotype = clusterings
clusters_plus_phenotype[['Subject']] = datMeta_redDim$Subject_ID
clusters_plus_phenotype[['ASD']] = datMeta_redDim$Diagnosis_
clusters_plus_phenotype[['Region']] = datMeta_redDim$Brain_lobe
clusters_plus_phenotype[['Sex']] = datMeta_redDim$Sex
clusters_plus_phenotype[['Age']] = datMeta_redDim$Age

cluster_sim = data.frame(matrix(nrow = length(clusters_plus_phenotype), ncol = length(clusters_plus_phenotype)))
for(i in 1:(length(clusters_plus_phenotype))){
  cluster1 = as.factor(clusters_plus_phenotype[[i]])
  for(j in (i):length(clusters_plus_phenotype)){
    cluster2 = as.factor(clusters_plus_phenotype[[j]])
    cluster_sim[i,j] = adj.rand.index(cluster1, cluster2)
  }
}
colnames(cluster_sim) = names(clusters_plus_phenotype)
rownames(cluster_sim) = colnames(cluster_sim)

cluster_sim = cluster_sim %>% as.matrix %>% round(2)
heatmap.2(x = cluster_sim, Rowv = FALSE, Colv = FALSE, dendrogram = 'none', 
          cellnote = cluster_sim, notecol = 'black', trace = 'none', key = FALSE, 
          cexRow = 1, cexCol = 1, margins = c(7,7))

rm(i, j, cluster1, cluster2, clusters_plus_phenotype, cluster_sim)

Scatter plots

ICA cluster associations by sample

create_2D_plot = function(cat_var){
 ggplotly(plot_points %>% ggplot(aes_string(x='PC1', y='PC2', color=cat_var)) + 
          geom_point() + theme_minimal() + 
          xlab(paste0('PC1 (', round(summary(pca_output)$importance[2,1]*100,2),'%)')) +
          ylab(paste0('PC2 (', round(summary(pca_output)$importance[2,2]*100,2),'%)')))
}
create_3D_plot = function(cat_var){
  plot_points %>% plot_ly(x=~PC1, y=~PC2, z=~PC3) %>% add_markers(color=plot_points[,cat_var], size=1) %>% 
    layout(title = glue('Samples coloured by ', cat_var),
           scene = list(xaxis=list(title=glue('PC1 (',round(summary(pca_output)$importance[2,1]*100,2),'%)')),
                        yaxis=list(title=glue('PC2 (',round(summary(pca_output)$importance[2,2]*100,2),'%)')),
                        zaxis=list(title=glue('PC3 (',round(summary(pca_output)$importance[2,3]*100,2),'%)'))))  
}

plot_points = datExpr_redDim %>% data.frame() %>% dplyr::select(PC1:PC3) %>%
              mutate(ID = rownames(.), subject_ID = datMeta_redDim$Subject_ID,
                km_clust = as.factor(clusterings[['km']]),       hc_clust = as.factor(clusterings[['hc']]),
                cc_l1_clust = as.factor(clusterings[['cc_l1']]), cc_clust = as.factor(clusterings[['cc_l2']]),
                ica_clust = as.factor(clusterings[['ICA_min']]), n_ica_clust = as.factor(rowSums(ICA_clusters)),
                gmm_clust = as.factor(clusterings[['GMM']]),     wgcna_clust = as.factor(clusterings[['WGCNA']]),
                sex = as.factor(datMeta_redDim$Sex),             region = as.factor(datMeta_redDim$Brain_lobe), 
                diagnosis = as.factor(datMeta_redDim$Diagnosis_), age = datMeta_redDim$Age)

ICA_clusters %>% mutate(cl_sum=rowSums(.)) %>% as.matrix %>% melt %>% ggplot(aes(Var2,Var1)) + 
  geom_tile(aes(fill=value)) + xlab('Clusters') + ylab('Samples') + 
  theme_minimal() + theme(axis.text.x=element_blank(), axis.ticks.x=element_blank()) + coord_flip()

2D plots of clusterings

create_2D_plot('km_clust')
create_2D_plot('hc_clust')
create_2D_plot('cc_l1_clust')
create_2D_plot('cc_clust')
create_2D_plot('ica_clust')
create_2D_plot('gmm_clust')
create_2D_plot('wgcna_clust')

2D plots of Phenotypes

create_2D_plot('diagnosis')
create_2D_plot('region')
create_2D_plot('sex')
create_2D_plot('age')

3D plots

create_3D_plot('ica_clust')